int i, rc = 0;
struct exec_domain *ed;
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
SH_VLOG("shadow mode table op %lx %lx count %d",
pagetable_val(d->exec_domain[0]->arch.guest_table), /* XXX SMP */
struct pfn_info *page = &frame_table[mfn];
struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
ASSERT(pfn_valid(mfn));
#ifndef NDEBUG
l2_pgentry_t l2e;
unsigned long l1pfn, l1mfn;
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
ASSERT(VALID_M2P(l2pfn));
perfc_incrc(shadow_out_of_sync_calls);
u32 found = 0, fixups, write_refs;
unsigned long prediction, predicted_gpfn, predicted_smfn;
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
ASSERT(VALID_MFN(readonly_gmfn));
perfc_incrc(remove_write_access);
if ( unlikely(!shadow_mode_enabled(d)) )
return 0;
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
perfc_incrc(remove_all_access);
for (i = 0; i < shadow_ht_buckets; i++)
int unshadow;
int changed;
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
for ( entry = d->arch.out_of_sync; entry; entry = entry->next)
{
perfc_incrc(shadow_sync_all);
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
// First, remove all write permissions to the page tables
//
#define __linear_hl2_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START + \
(PERDOMAIN_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
-#define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock)
-#define shadow_lock(_d) do { ASSERT(!spin_is_locked(&(_d)->arch.shadow_lock)); spin_lock(&(_d)->arch.shadow_lock); } while (0)
-#define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock)
+/*
+ * For now we use the per-domain BIGLOCK rather than a shadow-specific lock.
+ * We usually have the BIGLOCK already acquired anyway, so this is unlikely
+ * to cause much unnecessary extra serialisation. Also it's a recursive
+ * lock, and there are some code paths containing nested shadow_lock().
+ * The #if0'ed code below is therefore broken until such nesting is removed.
+ */
+#if 0
+#define shadow_lock_init(_d) \
+ spin_lock_init(&(_d)->arch.shadow_lock)
+#define shadow_lock_is_acquired(_d) \
+ spin_is_locked(&(_d)->arch.shadow_lock)
+#define shadow_lock(_d) \
+do { \
+ ASSERT(!shadow_lock_is_acquired(_d)); \
+ spin_lock(&(_d)->arch.shadow_lock); \
+} while (0)
+#define shadow_unlock(_d) \
+do { \
+ ASSERT(!shadow_lock_is_acquired(_d)); \
+ spin_unlock(&(_d)->arch.shadow_lock); \
+} while (0)
+#else
+#define shadow_lock_init(_d) \
+ ((_d)->arch.shadow_nest = 0)
+#define shadow_lock_is_acquired(_d) \
+ (spin_is_locked(&(_d)->big_lock) && ((_d)->arch.shadow_nest != 0))
+#define shadow_lock(_d) \
+do { \
+ LOCK_BIGLOCK(_d); \
+ (_d)->arch.shadow_nest++; \
+} while (0)
+#define shadow_unlock(_d) \
+do { \
+ ASSERT(shadow_lock_is_acquired(_d)); \
+ (_d)->arch.shadow_nest--; \
+ UNLOCK_BIGLOCK(_d); \
+} while (0)
+#endif
#define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((L1_PAGETABLE_ENTRIES - 1) - (_max)) << 16) | (_min))
#define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
unsigned long pfn;
int rc = 0;
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
ASSERT(d->arch.shadow_dirty_bitmap != NULL);
if ( !VALID_MFN(mfn) )
? __gpfn_to_mfn(d, gpfn)
: INVALID_MFN);
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
ASSERT(gpfn == (gpfn & PGT_mfn_mask));
ASSERT(stype && !(stype & ~PGT_type_mask));
struct shadow_status *x;
u32 pttype = PGT_none, type;
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
ASSERT(gpfn == (gpfn & PGT_mfn_mask));
perfc_incrc(shadow_max_type);
struct shadow_status *p, *x, *n, *head;
unsigned long key = gpfn | stype;
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
ASSERT(!(gpfn & ~PGT_mfn_mask));
ASSERT(stype && !(stype & ~PGT_type_mask));
SH_VVLOG("set gpfn=%lx gmfn=%lx smfn=%lx t=%lx", gpfn, gmfn, smfn, stype);
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(shadow_lock_is_acquired(d));
ASSERT(shadow_mode_translate(d) || gpfn);
ASSERT(!(gpfn & ~PGT_mfn_mask));